From c530a0331a70cab23334d86bb7bd842ba08646d9 Mon Sep 17 00:00:00 2001 From: Jimi Xenidis Date: Sat, 20 Jan 2007 18:57:15 -0500 Subject: [PATCH] [XEN][POWERPC] big lock to protect some TLB operations 970 requires locking around TLB operations, see code comment. Signed-off-by: Jimi Xenidis --HG-- extra : transplant_source : %0AT%1BS%0Fj%91%B0%A4%DE%25%DB%25%C0%ED3%F0w%92%02 --- xen/arch/powerpc/papr/xlate.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/xen/arch/powerpc/papr/xlate.c b/xen/arch/powerpc/papr/xlate.c index 1e95d4c879..a476691f74 100644 --- a/xen/arch/powerpc/papr/xlate.c +++ b/xen/arch/powerpc/papr/xlate.c @@ -72,6 +72,20 @@ static inline void pte_insert(union pte volatile *pte, } #endif +/* + * POWER Arch 2.03 Sec 4.12.1 (Yes 970 is one) + * + * when a tlbsync instruction has been executed by a processor in a + * given partition, a ptesync instruction must be executed by that + * processor before a tlbie or tlbsync instruction is executed by + * another processor in that partition. + * + * So for now, here is a BFLock to deal with it, the lock should be per-domain. + * + * XXX Will need to audit all tlb usege soon enough. + */ + +static DEFINE_SPINLOCK(native_tlbie_lock); static void pte_tlbie(union pte volatile *pte, ulong ptex) { ulong va; @@ -91,6 +105,7 @@ static void pte_tlbie(union pte volatile *pte, ulong ptex) va = (pi << 12) | (vsid << 28); va &= ~(0xffffULL << 48); + spin_lock(&native_tlbie_lock); #ifndef FLUSH_THE_WHOLE_THING if (pte->bits.l) { va |= (pte->bits.rpn & 1); @@ -114,7 +129,7 @@ static void pte_tlbie(union pte volatile *pte, ulong ptex) } } #endif - + spin_unlock(&native_tlbie_lock); } long pte_enter(ulong flags, ulong ptex, ulong vsid, ulong rpn) -- 2.30.2